In your final repo, there should be an R markdown file that organizes all computational steps for evaluating your proposed Facial Expression Recognition framework.
This file is currently a template for running evaluation experiments. You should update it according to your codes but following precisely the same structure.
Provide directories for training images. Training images and Training fiducial points will be in different subfolders.
In this chunk, we have a set of controls for the evaluation experiments.
run.cv <- TRUE # run cross-validation on the training set
sample.reweight <- TRUE # run sample reweighting in model training
K <- 5 # number of CV folds
run.feature.train <- T # process features for training set
run.test <- TRUE # run evaluation on an independent test set
run.feature.test <- T # process features for test set
#train-test split
info <- read.csv("../../data/train_set/label.csv")
n <- nrow(info)
n_train <- round(n*(4/5), 0)
train_idx <- sample(info$Index, n_train, replace = F)
test_idx <- setdiff(info$Index, train_idx)
If you choose to extract features from images, such as using Gabor filter, R memory will exhaust all images are read together. The solution is to repeat reading a smaller batch(e.g 100) and process them.
# n_files <- length(list.files(train_image_dir))
# image_list <- list()
# for(i in 1:100){
# image_list[[i]] <- readImage(paste0(train_image_dir, sprintf("%04d", i), ".jpg"))
# }
#
Fiducial points are stored in matlab format. In this step, we read them and store them in a list.
# #function to read fiducial points
# #input: index
# #output: matrix of fiducial points corresponding to the index
# readMat.matrix <- function(index){
# return(round(readMat(paste0(train_pt_dir, sprintf("%04d", index), ".mat"))[[1]],0))
# }
# #load fiducial points
# fiducial_pt_list <- lapply(1:n_files, readMat.matrix)
# save(fiducial_pt_list, file="../output/fiducial_pt_list.RData")
load("../../output/fiducial_pt_list.RData")
The follow plots show how pairwise distance between fiducial points can work as feature for facial emotion recognition.
Figure1
feature.R should be the wrapper for all your feature engineering functions and options. The function feature( ) should have options that correspond to different scenarios for your project and produces an R object that contains features and responses that are required by all the models you are going to evaluate later.
feature.Rsource("../../lib/feature.R")
tm_feature_train <- NA
if(run.feature.train){
tm_feature_train <- system.time(dat_train <- feature(fiducial_pt_list, train_idx))
save(dat_train, file="../../output/feature_train.RData")
}else{
load(file="../../output/feature_train.RData")
}
tm_feature_test <- NA
if(run.feature.test){
tm_feature_test <- system.time(dat_test <- feature(fiducial_pt_list, test_idx))
save(dat_test, file="../output/feature_test.RData")
}else{
load(file="../../output/feature_test.RData")
}
Same as Baseline model
Using cross-validation, we compare the performance of models with different specifications. In the following chunk of code, we tune parameter cost_svm (number of shrinkage) for the Support vector machine (SVM)
source("../../lib/SVM_model.R")
# SVM Cross-validation
cost = c(0.00001, 0.0001, 0.001, 0.01, 0.1)
model_labels_svm = paste("SVM with cost =", cost)
model_labels_svm
# err_svm <- matrix(0, nrow = length(cost), ncol = 2)
# for(i in 1:length(cost)){
# print(paste("cost=", cost[i]))
# err_svm[i,] <- CV_SVM(dat_train, K=5, cost[i])
# save(err_svm, file="../output/err_svm.RData")
# }
#Load visualization of cross validation results of svm
load("../../output/err_svm.RData")
err_svm <- as.data.frame(err_svm)
colnames(err_svm) <- c("mean_error", "sd_error")
cost = c(0.00001, 0.0001, 0.001, 0.01, 0.1)
err_svm$cost = as.factor(cost)
err_svm %>% ggplot(aes(x = cost, y = mean_error, ymin = mean_error - sd_error, ymax = mean_error + sd_error)) +
geom_crossbar() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
Find the best cost for SVM model and Run training and testing, then save them as RDS files. In order to save time, I commend out this chunk of code
# cost_best_svm <- cost[which.min(err_svm[,1])]
# #par_best_svm <- list(cost = cost_best_svm)
# # Training
# tm_train_svm = NA
# tm_train_svm <- system.time(fit_train_svm <- svm(label ~., data = dat_train, kernel = "linear", cost = cost_best_svm) )
# # Testing
# tm_test_svm=NA
# tm_test_svm <- system.time(pred_svm <- predict(fit_train_svm, dat_test))
# #Save and load
# saveRDS(tm_train_svm, "../../output/tm_train_svm.RDS")
# saveRDS(tm_test_svm, "../../output/tm_test_svm.RDS")
# saveRDS(fit_train_svm, "../../output/fit_train_svm.RDS")
# saveRDS(pred_svm, "../../output/pred_svm.RDS")
# load models and training and testing time
tm_train_svm <- readRDS("../../output/tm_train_svm.RDS")
tm_test_svm <- readRDS("../../output/tm_test_svm.RDS")
fit_train_svm <- readRDS("../../output/fit_train_svm.RDS")
pred_svm <- readRDS("../../output/pred_svm.RDS")
# Evaluation
accu_svm <- mean(dat_test$label == pred_svm)
real_label = dat_test$label %>% as.character() %>% as.numeric()
pred_value_svm = pred_svm %>% as.character() %>% as.numeric()
confusionMatrix(pred_svm,dat_test$label)
cost_choose <- cost[which.min(err_svm[,1])]
cat("The accuracy of model: cost =", cost_choose, "is", accu_svm*100, "%.\n")
# The AUC for SVM model
AUC_SVM = auc_roc(real_label, pred_value_svm)
AUC_SVM
Prediction performance matters, so does the running times for constructing features and for training the model, especially when the computation resource is limited.
Model_performace <- function(time_feature_train, time_feature_test, time_train, time_test){
cat("Time for constructing training features=", time_feature_train[1], "s \n")
cat("Time for constructing testing features=", time_feature_test[1], "s \n")
cat("Time for training model=", time_train[1], "s \n")
cat("Time for testing model=", time_test[1], "s \n")
}
cat("The accuracy of the SVM model: cost =", cost[which.min(err_svm[,1])], "is", accu_svm*100, "%.\n")
The accuracy of the SVM model: cost = 0.001 is 85.33333 %.
cat("The auc value for the SVM model is",AUC_SVM * 100, "%.\n")
The auc value for the SVM model is 80.36243 %.
Model_performace(tm_feature_train, tm_feature_test, tm_train_svm, tm_test_svm)
Time for constructing training features= NA s
Time for constructing testing features= NA s
Time for training model= 96.392 s
Time for testing model= 8.741 s
Same as Baseline model
source("../../lib/SVM_model_weighted.R")
# #SVM Cross-validation
# cost = c(0.00001,0.0001,0.001,0.01,0.1,1)
# err_svm_weight <- matrix(0, nrow = length(cost), ncol = 2)
# for(i in 1:length(cost)){
# print(paste("cost:", cost[i]))
# err_svm_weight[i,] <- CV_SVM_weight(dat_train, K = 5, cost[i])
# saveRDS(err_svm_weight, file="../../output/err_svm_weight.RDS")
# }
# err_svm_weight
# cost_best_svm_weight <- cost[which.min(err_svm_weight[,1])]
# saveRDS(cost_best_svm_weight, file="../../output/cost_best_svm_weight.RDS")
From err_svm_weight, we obtained that the cost didn’t influence the error mean and the error’s standard deviation
cost_best_svm_weight
[1] 1
Prediction performance matters, so does the running times for constructing features and for training the model, especially when the computation resource is limited.
print(paste("The accuracy of the SVM improved model: cost =", cost_best_svm_weight , "is", accu_svm_weight * 100, "%"))
print(paste("The auc value for tje SVM improved model is", AUC_SVM_weight * 100, "%"))
Model_performace(tm_feature_train, tm_feature_test, tm_train_svm_weight, tm_test_svm_weight)
Feature extraction is the same as Baseline model, and we improved the feature by using Principal Components Analysis for feature selection.
feature_improved.R should be the wrapper for all your feature engineering functions and options. The function feature_improved() should have options that correspond to different scenarios for your project and produces an R object that contains features and responses that are required by all the models you are going to evaluate later.
+ feature_improved.R
+ Input: train/test data
+ Output: an RData file that contains extracted features and corresponding responses
tm_feature_train_imp
[1] NA
source("../../lib/SVM_model_weighted.R")
# #SVM Cross-validation
# cost = c(0.00001,0.0001,0.001,0.01,0.1,1)
# err_svm_imp <- matrix(0, nrow = length(cost), ncol = 2)
# for(i in 1:length(cost)){
# print(paste("cost:", cost[i]))
# err_svm_imp[i,] <- CV_SVM_weight(data_train_imp, K = 5, cost[i])
# saveRDS(err_svm_imp, file="../output/err_svm_imp.RDS")
# }
err_svm_imp <- readRDS("../../output/err_svm_imp.RDS")
err_svm_imp
cost_best_svm_imp <- cost[which.min(err_svm_imp[,1])]
saveRDS(cost_best_svm_imp, file="../../output/cost_best_svm_imp.RDS")
From err_svm_imp, we obtained that the cost didn’t influence the error mean and the error’s standard deviation
cost_best_svm_imp <- readRDS("../../output/cost_best_svm_imp.RDS")
# Training
tm_train_svm_imp = NA
temp <- ovun.sample(label ~ ., data = data_train_imp, method = "over", p = 0.3)$data
tm_train_svm_imp <- system.time(fit_train_svm_imp <- svm(label ~., data = temp, kernel = "linear", cost = cost_best_svm_imp) )
#Save and load model
saveRDS(fit_train_svm_imp, "../../output/fit_train_svm_imp.RDS")
saveRDS(tm_train_svm_imp, "../../output/tm_train_svm_imp.RDS")
fit_train_svm_imp <- readRDS("../../output/fit_train_svm_imp.RDS")
tm_train_svm_imp <- readRDS("../../output/tm_train_svm_imp.RDS")
# Testing
tm_test_svm_imp = NA
tm_test_svm_imp <- system.time(pred_svm_imp <- predict(fit_train_svm_imp, data_test_imp))
saveRDS(tm_test_svm_imp, "../../output/tm_test_svm_imp.RDS")
tm_test_svm_imp <- readRDS("../../output/tm_test_svm_imp.RDS")
# Evaluation
accu_svm_imp <- mean(dat_test$label == pred_svm_imp)
accu_svm_imp
pred_value_svm_imp = pred_svm_imp %>% as.character() %>% as.numeric()
real_label = dat_test$label %>% as.character() %>% as.numeric()
confusionMatrix(pred_svm_imp, dat_test$label)
print(paste("The accuracy of model: cost =", cost_best_svm_imp, "is", accu_svm_imp * 100, "%"))
# The AUC for SVM model
AUC_SVM_imp = auc_roc(real_label, pred_value_svm_imp)
AUC_SVM_imp
Prediction performance matters, so does the running times for constructing features and for training the model, especially when the computation resource is limited.
print(paste("The accuracy of the SVM improved model: cost =", cost_best_svm_imp , "is", accu_svm_imp * 100, "%"))
print(paste("The auc value for tje SVM improved model is", AUC_SVM_imp * 100, "%"))
Model_performace(tm_feature_train_imp, tm_feature_test_imp, tm_train_svm_imp, tm_test_svm_imp)
###Reference - Du, S., Tao, Y., & Martinez, A. M. (2014). Compound facial expressions of emotion. Proceedings of the National Academy of Sciences, 111(15), E1454-E1462.